#include <asm/hvm/hvm.h>
#include <asm/hvm/io.h>
#include <asm/hvm/support.h>
+#include <asm/hvm/vlapic.h>
#include <asm/hvm/svm/svm.h>
#include <asm/hvm/svm/intr.h>
#include <xen/event.h>
svm_inject_dummy_vintr(v);
}
+static void update_cr8_intercept(
+ struct vcpu *v, enum hvm_intack masked_intr_source)
+{
+ struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
+ struct vlapic *vlapic = vcpu_vlapic(v);
+ int max_irr;
+
+ vmcb->cr_intercepts &= ~CR_INTERCEPT_CR8_WRITE;
+
+ /*
+ * If ExtInts are masked then that dominates the TPR --- the 'interrupt
+ * window' has already been enabled in this case.
+ */
+ if ( (masked_intr_source == hvm_intack_lapic) ||
+ (masked_intr_source == hvm_intack_pic) )
+ return;
+
+ /* Is there an interrupt pending at the LAPIC? Nothing to do if not. */
+ if ( !vlapic_enabled(vlapic) ||
+ ((max_irr = vlapic_find_highest_irr(vlapic)) == -1) )
+ return;
+
+ /* Highest-priority pending interrupt is masked by the TPR? */
+ if ( (vmcb->vintr.fields.tpr & 0xf) >= (max_irr >> 4) )
+ vmcb->cr_intercepts |= CR_INTERCEPT_CR8_WRITE;
+}
+
asmlinkage void svm_intr_assist(void)
{
struct vcpu *v = current;
do {
intr_source = hvm_vcpu_has_pending_irq(v);
if ( likely(intr_source == hvm_intack_none) )
- return;
+ goto out;
/*
* Pending IRQs must be delayed if:
!hvm_interrupts_enabled(v, intr_source) )
{
enable_intr_window(v, intr_source);
- return;
+ goto out;
}
} while ( !hvm_vcpu_ack_pending_irq(v, intr_source, &intr_vector) );
intr_source = hvm_vcpu_has_pending_irq(v);
if ( unlikely(intr_source != hvm_intack_none) )
enable_intr_window(v, intr_source);
+
+ out:
+ update_cr8_intercept(v, intr_source);
}
/*
eventinj_t eventinj;
int inst_len, rc;
+ /*
+ * Before doing anything else, we need to sync up the VLAPIC's TPR with
+ * SVM's vTPR if CR8 writes are currently disabled. It's OK if the
+ * guest doesn't touch the CR8 (e.g. 32-bit Windows) because we update
+ * the vTPR on MMIO writes to the TPR
+ */
+ if ( !(vmcb->cr_intercepts & CR_INTERCEPT_CR8_WRITE) )
+ vlapic_set_reg(vcpu_vlapic(v), APIC_TASKPRI,
+ (vmcb->vintr.fields.tpr & 0x0F) << 4);
+
exit_reason = vmcb->exitcode;
HVMTRACE_2D(VMEXIT, v, vmcb->rip, exit_reason);
svm_asid_init_vcpu(v);
vmcb->general1_intercepts =
- GENERAL1_INTERCEPT_INTR | GENERAL1_INTERCEPT_NMI |
- GENERAL1_INTERCEPT_SMI | GENERAL1_INTERCEPT_INIT |
- GENERAL1_INTERCEPT_CPUID | GENERAL1_INTERCEPT_INVD |
- GENERAL1_INTERCEPT_HLT | GENERAL1_INTERCEPT_INVLPG |
- GENERAL1_INTERCEPT_INVLPGA | GENERAL1_INTERCEPT_IOIO_PROT |
- GENERAL1_INTERCEPT_MSR_PROT | GENERAL1_INTERCEPT_SHUTDOWN_EVT;
+ GENERAL1_INTERCEPT_INTR | GENERAL1_INTERCEPT_NMI |
+ GENERAL1_INTERCEPT_SMI | GENERAL1_INTERCEPT_INIT |
+ GENERAL1_INTERCEPT_CPUID | GENERAL1_INTERCEPT_INVD |
+ GENERAL1_INTERCEPT_HLT | GENERAL1_INTERCEPT_INVLPG |
+ GENERAL1_INTERCEPT_INVLPGA | GENERAL1_INTERCEPT_IOIO_PROT |
+ GENERAL1_INTERCEPT_MSR_PROT | GENERAL1_INTERCEPT_SHUTDOWN_EVT;
vmcb->general2_intercepts =
- GENERAL2_INTERCEPT_VMRUN | GENERAL2_INTERCEPT_VMMCALL |
- GENERAL2_INTERCEPT_VMLOAD | GENERAL2_INTERCEPT_VMSAVE |
- GENERAL2_INTERCEPT_STGI | GENERAL2_INTERCEPT_CLGI |
- GENERAL2_INTERCEPT_SKINIT | GENERAL2_INTERCEPT_RDTSCP;
+ GENERAL2_INTERCEPT_VMRUN | GENERAL2_INTERCEPT_VMMCALL |
+ GENERAL2_INTERCEPT_VMLOAD | GENERAL2_INTERCEPT_VMSAVE |
+ GENERAL2_INTERCEPT_STGI | GENERAL2_INTERCEPT_CLGI |
+ GENERAL2_INTERCEPT_SKINIT | GENERAL2_INTERCEPT_RDTSCP;
/* Intercept all debug-register writes. */
vmcb->dr_intercepts = DR_INTERCEPT_ALL_WRITES;
- /* Intercept all control-register accesses, except to CR2. */
- vmcb->cr_intercepts = ~(CR_INTERCEPT_CR2_READ | CR_INTERCEPT_CR2_WRITE);
+ /*
+ * Intercept all control-register accesses except for CR2 reads/writes
+ * and CR8 reads (and actually CR8 writes, but that's a special case
+ * that's handled in svm/intr.c).
+ */
+ vmcb->cr_intercepts = ~(CR_INTERCEPT_CR2_READ |
+ CR_INTERCEPT_CR2_WRITE |
+ CR_INTERCEPT_CR8_READ);
/* I/O and MSR permission bitmaps. */
arch_svm->msrpm = alloc_xenheap_pages(get_order_from_bytes(MSRPM_SIZE));